mov r16=-1
(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
+#ifndef XEN
+ // XEN: stack is allocated in xenheap, which is currently always
+ // mapped.
// load mapping for stack (virtaddr in r2, physaddr in r3)
rsm psr.ic
movl r17=PAGE_KERNEL
ssm psr.ic
srlz.d
;;
-
+#endif
+
.load_current:
// load the "current" pointer (r13) and ar.k6 with the current task
#if defined(XEN) && defined(VALIDATE_VT)
ipi_data = (delivery_mode << 8) | (vector & 0xff);
ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
+#ifdef XEN
+ printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id);
+#endif
writeq(ipi_data, ipi_addr);
}
*/
if (smp_processor_id() == 0) {
#ifdef XEN
- cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SHIFT -
- PAGE_SHIFT + get_order(NR_CPUS));
+ cpu_data = alloc_xenheap_pages(get_order(NR_CPUS
+ * PERCPU_PAGE_SIZE));
#else
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
#endif
+void __init
#ifdef XEN
early_setup_arch (char **cmdline_p)
#else
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
*cmdline_p = __va(ia64_boot_param->command_line);
-#ifdef XEN
- efi_init();
-#else
+#ifndef XEN
strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+#endif
efi_init();
io_port_init();
-#endif
#ifdef CONFIG_IA64_GENERIC
{
#ifdef XEN
early_cmdline_parse(cmdline_p);
cmdline_parse(*cmdline_p);
-#undef CONFIG_ACPI_BOOT
#endif
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
+#ifdef XEN
+}
+
+void __init
+late_setup_arch (char **cmdline_p)
+{
+#endif
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
#ifndef XEN
find_memory();
-#else
- io_port_init();
-}
-
-void __init
-late_setup_arch (char **cmdline_p)
-{
-#undef CONFIG_ACPI_BOOT
- acpi_table_init();
#endif
+
/* process SAL system table: */
ia64_sal_init(efi.sal_systab);
#ifdef CONFIG_SMP
+#ifdef XEN
+ init_smp_config ();
+#endif
+
cpu_physical_id(0) = hard_smp_processor_id();
cpu_set(0, cpu_sibling_map[0]);
cpu_data = per_cpu_init();
+#ifdef XEN
+ printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n",
+ current, current->domain->arch.mm);
+#endif
+
/*
* We set ar.k3 so that assembly code in MCA handler can compute
* physical addresses of per cpu variables with a simple:
#ifndef XEN
pm_idle = default_idle;
#endif
+
+#ifdef XEN
+ /* surrender usage of kernel registers to domain, use percpu area instead */
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
+ __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
+#endif
}
void
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
void smp_send_event_check_mask(cpumask_t mask)
{
- printf("smp_send_event_check_mask called\n");
- //dummy();
- //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
+ int cpu;
+
+ /* Not for me. */
+ cpu_clear(smp_processor_id(), mask);
+ if (cpus_empty(mask))
+ return;
+
+ printf("smp_send_event_check_mask called\n");
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu)
+ if (cpu_isset(cpu, mask))
+ platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
send_IPI_single(smp_processor_id(), op);
}
+#ifndef XEN
/*
* Called with preeemption disabled.
*/
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+#endif
void
smp_flush_tlb_all (void)
if (wait)
atomic_set(&data.finished, 0);
- printk("smp_call_function: about to spin_lock \n");
spin_lock(&call_lock);
- printk("smp_call_function: done with spin_lock \n");
+#if 0 //def XEN
+ printk("smp_call_function: %d lock\n", smp_processor_id ());
+#endif
call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- printk("smp_call_function: about to send_IPI \n");
send_IPI_allbutself(IPI_CALL_FUNC);
- printk("smp_call_function: done with send_IPI \n");
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
call_data = NULL;
- printk("smp_call_function: about to spin_unlock \n");
spin_unlock(&call_lock);
+#if 0 //def XEN
printk("smp_call_function: DONE WITH spin_unlock, returning \n");
+#endif
return 0;
}
EXPORT_SYMBOL(smp_call_function);
do_rest:
task_for_booting_cpu = c_idle.idle;
+#else
+ struct domain *idle;
+ struct vcpu *v;
+ void *stack;
+
+ if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
+ panic("failed 'createdomain' for CPU %d", cpu);
+ set_bit(_DOMF_idle_domain, &idle->domain_flags);
+ v = idle->vcpu[0];
+
+ printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
+
+ task_for_booting_cpu = v;
+
+ /* Set cpu number. */
+ get_thread_info(v)->cpu = cpu;
#endif
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
#ifdef CONFIG_ACPI_BOOT
#define ACPI_MAX_PLATFORM_INTERRUPTS 256
+#define NR_IOSAPICS 4
#if 0
/* Array to record platform interrupt vectors for generic interrupt routing. */
struct acpi_table_madt * acpi_madt __initdata;
static u8 has_8259;
-#if 0
static int __init
acpi_parse_lapic_addr_ovr (
acpi_table_entry_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
+#if 0
iosapic_init(iosapic->address, iosapic->global_irq_base);
+#endif
return 0;
}
-
static int __init
acpi_parse_plat_int_src (
acpi_table_entry_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
+#if 0
/*
* Get vector assignment for this interrupt, set attributes,
* and program the IOSAPIC routing table.
(plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
platform_intr_list[plintsrc->type] = vector;
+#endif
return 0;
}
acpi_table_print_madt_entry(header);
+#if 0
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
+#endif
return 0;
}
-
static int __init
acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
{
*/
sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
+#if 0
/*Start cyclone clock*/
cyclone_setup(0);
+#endif
}
}
#else
has_8259 = acpi_madt->flags.pcat_compat;
#endif
+#if 0
iosapic_system_init(has_8259);
+#endif
/* Get base address of IPI Message Block */
return 0;
}
-#endif
#ifdef CONFIG_ACPI_NUMA
return acpi_register_irq(gsi, polarity, trigger);
}
EXPORT_SYMBOL(acpi_register_gsi);
+#endif
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
acpi_legacy_devices = 1;
+#if 0
acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
+#endif
return 0;
}
-#endif
unsigned long __init
acpi_find_rsdp (void)
return rsdp_phys;
}
-#if 0
int __init
acpi_boot_init (void)
{
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
return 0;
}
+#if 0
int
acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
{
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/desc.h>
+#include <asm/hw_irq.h>
//#include <asm/mpspec.h>
#include <xen/irq.h>
#include <xen/event.h>
//free_page((unsigned long)d->mm.perdomain_pt);
}
-int hlt_counter;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
static void default_idle(void)
{
- if ( hlt_counter == 0 )
- {
+ int cpu = smp_processor_id();
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) )
+ if ( !softirq_pending(cpu))
safe_halt();
- //else
- local_irq_enable();
- }
+ local_irq_enable();
}
-void continue_cpu_idle_loop(void)
+static void continue_cpu_idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
{
+ printf ("idle%dD\n", cpu);
#ifdef IA64
// __IRQ_STAT(cpu, idle_timestamp) = jiffies
#else
#endif
while ( !softirq_pending(cpu) )
default_idle();
+ add_preempt_count(SOFTIRQ_OFFSET);
raise_softirq(SCHEDULE_SOFTIRQ);
do_softirq();
+ sub_preempt_count(SOFTIRQ_OFFSET);
}
}
void startup_cpu_idle_loop(void)
{
+ int cpu = smp_processor_id ();
/* Just some sanity to ensure that the scheduler is set up okay. */
ASSERT(current->domain == IDLE_DOMAIN_ID);
+ printf ("idle%dA\n", cpu);
raise_softirq(SCHEDULE_SOFTIRQ);
+#if 0 /* All this work is done within continue_cpu_idle_loop */
+ printf ("idle%dB\n", cpu);
+ asm volatile ("mov ar.k2=r0");
do_softirq();
+ printf ("idle%dC\n", cpu);
/*
* Declares CPU setup done to the boot processor.
* Therefore memory barrier to ensure state is visible.
*/
smp_mb();
+#endif
#if 0
//do we have to ensure the idle task has a shared page so that, for example,
//region registers can be loaded from it. Apparently not...
v->arch.breakimm = d->arch.breakimm;
d->arch.sys_pgnr = 0;
- d->arch.mm = xmalloc(struct mm_struct);
- if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
- memset(d->arch.mm, 0, sizeof(*d->arch.mm));
- d->arch.mm->pgd = pgd_alloc(d->arch.mm);
- if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
+ if (d->domain_id != IDLE_DOMAIN_ID) {
+ d->arch.mm = xmalloc(struct mm_struct);
+ if (unlikely(!d->arch.mm)) {
+ printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
+ memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+ d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+ if (unlikely(!d->arch.mm->pgd)) {
+ printk("Can't allocate pgd for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
+ } else
+ d->arch.mm = NULL;
+ printf ("arch_do_create_domain: domain=%p\n", d);
}
void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
return 0;
}
+#include <xen/sched-if.h>
+
+extern struct schedule_data schedule_data[NR_CPUS];
+
void schedule_tail(struct vcpu *next)
{
unsigned long rr7;
//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
+
+ // TG: Real HACK FIXME.
+ // This is currently necessary because when a new domain is started,
+ // the context_switch function of xen/common/schedule.c(__enter_scheduler)
+ // never returns. Therefore, the lock must be released.
+ // schedule_tail is only called when a domain is started.
+ spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
+
/* rr7 will be postponed to last point when resuming back to guest */
if(VMX_DOMAIN(current)){
vmx_load_all_rr(current);
int
xen_do_IRQ(ia64_vector vector)
{
- if (vector != 0xef) {
+ if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) {
extern void vcpu_pend_interrupt(void *, int);
#if 0
if (firsttime[vector]) {
return(0);
}
-/* From linux/kernel/softirq.c */
-#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-# define invoke_softirq() __do_softirq()
-#else
-# define invoke_softirq() do_softirq()
-#endif
-
/*
* Exit an interrupt context. Process softirqs if needed and possible:
*/
void irq_exit(void)
{
//account_system_vtime(current);
- //sub_preempt_count(IRQ_EXIT_OFFSET);
- if (!in_interrupt() && local_softirq_pending())
- invoke_softirq();
+ sub_preempt_count(IRQ_EXIT_OFFSET);
+ if (!in_interrupt() && local_softirq_pending()) {
+ add_preempt_count(SOFTIRQ_OFFSET);
+ do_softirq();
+ sub_preempt_count(SOFTIRQ_OFFSET);
+ }
//preempt_enable_no_resched();
}
/* end from linux/kernel/softirq.c */
unsigned long context_switch_count = 0;
+#include <asm/vcpu.h>
+
void context_switch(struct vcpu *prev, struct vcpu *next)
{
//printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
//prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
//if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
//if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
-//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
+printk("@@sw%d/%x %d->%d\n",smp_processor_id(), hard_smp_processor_id (),
+ prev->domain->domain_id,next->domain->domain_id);
if(VMX_DOMAIN(prev)){
vtm_domain_out(prev);
}
printk("About to call scheduler_init()\n");
scheduler_init();
local_irq_disable();
+ init_IRQ ();
printk("About to call init_xen_time()\n");
init_xen_time(); /* initialise the time */
printk("About to call ac_timer_init()\n");
ac_timer_init();
-// init_xen_time(); ???
#ifdef CONFIG_SMP
if ( opt_nosmp )
//BUG_ON(!local_irq_is_enabled());
+ /* Enable IRQ to receive IPI (needed for ITC sync). */
+ local_irq_enable();
+
printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
for_each_present_cpu ( i )
{
}
}
+ local_irq_disable();
+
printk("Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(max_cpus);
#endif
-
- // FIXME: Should the following be swapped and moved later?
- schedulers_start();
do_initcalls();
printk("About to call sort_main_extable()\n");
sort_main_extable();
- /* surrender usage of kernel registers to domain, use percpu area instead */
- __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
/* Create initial domain 0. */
printk("About to call do_createdomain()\n");
0,
0) != 0)
panic("Could not set up DOM0 guest OS\n");
+
+ /* PIN domain0 on CPU 0. */
+ dom0->vcpu[0]->cpumap=1;
+ set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags);
+
#ifdef CLONE_DOMAIN0
{
int i;
domain_unpause_by_systemcontroller(clones[i]);
}
#endif
- domain_unpause_by_systemcontroller(dom0);
domain0_ready = 1;
+
local_irq_enable();
+
+ printf("About to call schedulers_start dom0=%p, idle0_dom=%p\n",
+ dom0, &idle0_domain);
+ schedulers_start();
+
+ domain_unpause_by_systemcontroller(dom0);
+
printk("About to call startup_cpu_idle_loop()\n");
startup_cpu_idle_loop();
}
#ifdef HEARTBEAT_FREQ
static long count = 0;
if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
- printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
- regs->cr_iip,
+ printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
+ regs->cr_iip /*,
VCPU(current,interrupt_delivery_enabled),
- VCPU(current,pending_interruption));
+ VCPU(current,pending_interruption) */);
count = 0;
}
#endif
#ifdef CONFIG_XEN_SMP
#define CONFIG_SMP 1
-#define NR_CPUS 2
-#define CONFIG_NR_CPUS 2
+#define NR_CPUS 8
+#define CONFIG_NR_CPUS 8
#else
#undef CONFIG_SMP
#define NR_CPUS 1
#ifdef CONFIG_SMP
#warning "Lots of things to fix to enable CONFIG_SMP!"
#endif
-// FIXME SMP
-#define get_cpu() 0
+#define get_cpu() smp_processor_id()
#define put_cpu() do {} while(0)
// needed for common/dom0_ops.c until hyperthreading is supported
// function calls; see decl in xen/include/xen/sched.h
#undef free_task_struct
#undef alloc_task_struct
+#define get_thread_info(v) alloc_thread_info(v)
// initial task has a different name in Xen
//#define idle0_task init_task
#endif /* __XEN_IA64_CONFIG_H__ */
// needed for include/xen/smp.h
+#ifdef CONFIG_SMP
+#define __smp_processor_id() current_thread_info()->cpu
+#else
#define __smp_processor_id() 0
+#endif
// FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7
#include <asm/intrinsics.h>
#include <asm/system.h>
+#define DEBUG_SPINLOCK
+
typedef struct {
volatile unsigned int lock;
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
+#ifdef DEBUG_SPINLOCK
+ void *locker;
+#endif
#ifdef XEN
unsigned char recurse_cpu;
unsigned char recurse_cnt;
: "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
# endif /* CONFIG_MCKINLEY */
#endif
+
+#ifdef DEBUG_SPINLOCK
+ asm volatile ("mov %0=ip" : "=r" (lock->locker));
+#endif
}
#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
-#ifdef XEN
-#define in_interrupt() 0 // FIXME SMP LATER
-#else
#define in_interrupt() (irq_count())
-#endif
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
#define save_and_cli(x) save_and_cli(&x)
#endif /* CONFIG_SMP */
+#ifndef XEN
/* SoftIRQ primitives. */
#define local_bh_disable() \
do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
extern void local_bh_enable(void);
+#endif
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
frequency threaded job scheduling. For almost all the purposes